This notebook combines the setup and image processing scripts for Omnipose, following the recommended installation procedures for Jupyter environments.
This notebook sets up the required dependencies for Omnipose using a conda virtual environment.
In [ ]:
# Create a new conda virtual environment named 'omnipose_env'
#!conda create --name omnipose_env python=3.9.13
# Activate the virtual environment
!conda activate omnipose_env
# Install the required dependencies
#%conda install -c conda-forge pytest pathlib scikit-image
# Note: 'omnipose' might need to be installed through pip or other means if it's not available in conda channels
#!pip install omnipose
The following section contains the Omnipose image processing code.
In [ ]:
# Import dependencies
import numpy as np
from cellpose_omni import models, core
# This checks to see if you have set up your GPU properly.
# CPU performance is a lot slower, but not a problem if you
# are only processing a few images.
use_GPU = core.use_gpu()
print('>>> GPU activated? {}'.format(use_GPU))
# for plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.dpi'] = 300
plt.style.use('dark_background')
c:\Users\MicrobeJ\anaconda3\envs\omnipose_env\Lib\site-packages\tqdm\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm
2023-08-30 10:51:24,480 [INFO] TORCH GPU version not installed/working. >>> GPU activated? False >>> GPU activated? False
Now that I have everything set up I need to load my images, which can be done in a variety of ways but here we will save them as variable "files" by importing a path and then matching file names with extensions and keywords
In [ ]:
from pathlib import Path
basedir = r'C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence'
files = [str(p) for p in Path(basedir).glob("Max.tif*")]
from cellpose_omni import io, transforms
from omnipose.utils import normalize99
imgs = [io.imread(f) for f in files]
# print some info about the images.
for i in imgs:
print('Original image shape:',i.shape)
print('data type:',i.dtype)
print('data range: min {}, max {}\n'.format(i.min(),i.max()))
nimg = len(imgs)
print('\nnumber of images:',nimg)
fig = plt.figure(figsize=[40]*2,frameon=False) # initialize figure
print('\n')
for k in range(len(imgs)):
img = transforms.move_min_dim(imgs[k]) # move the channel dimension last
if len(img.shape)>2:
# imgs[k] = img[:,:,1] # could pick out a specific channel
imgs[k] = np.mean(img,axis=-1) # or just turn into grayscale
imgs[k] = normalize99(imgs[k])
# imgs[k] = np.pad(imgs[k],10,'edge')
print('new shape: ', imgs[k].shape)
plt.subplot(1,len(files),k+1)
plt.imshow(imgs[k],cmap='gray')
plt.axis('off')
Original image shape: (3, 512, 512) data type: uint16 data range: min 1161, max 39445 Original image shape: (3, 512, 512) data type: uint16 data range: min 1130, max 30580 Original image shape: (3, 512, 512) data type: uint16 data range: min 1121, max 28729 Original image shape: (3, 512, 512) data type: uint16 data range: min 1072, max 28934 Original image shape: (3, 512, 512) data type: uint16 data range: min 1117, max 28229 Original image shape: (3, 512, 512) data type: uint16 data range: min 1131, max 29899 Original image shape: (3, 512, 512) data type: uint16 data range: min 1096, max 32924 Original image shape: (3, 512, 512) data type: uint16 data range: min 1073, max 31544 Original image shape: (3, 512, 512) data type: uint16 data range: min 1147, max 28697 Original image shape: (3, 512, 512) data type: uint16 data range: min 1148, max 27875 number of images: 10 new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512) new shape: (512, 512)
Decide Which Model to use¶
Here we will list the models and choose which model to use. I am assuming we will almost always use the bact_phase_omni or bact_phase_cp models
In [ ]:
from skimage.io import imread, imsave
import numpy as np
from pathlib import Path
from cellpose_omni import models, utils, io
import time
# Initialize model
model_name = 'bact_phase_omni'
model = models.CellposeModel(gpu=False, model_type=model_name)
# Parameters
chans = [0,0] # Segment based on first channel, no second channel
n = range(len(files)) # Segment all images
mask_threshold = -1
flow_threshold = 0
niter = None
omni = True
cluster = True
resample = True
verbose = 0
affinity_seg = 0
# Run segmentation
tic = time.time()
masks, flows, styles = model.eval(
[imread(f)[..., 0] for f in files], # Reading only the first channel for segmentation
channels=chans,
mask_threshold=mask_threshold,
flow_threshold=flow_threshold,
niter=niter,
omni=omni,
cluster=cluster,
resample=resample,
verbose=verbose,
affinity_seg=affinity_seg
)
net_time = time.time() - tic
print(f'total segmentation time: {net_time}s')
# Apply masks and save images
for idx, mask in enumerate(masks):
original_image = imread(files[idx]) # Read the original multi-channel image
if original_image.shape[:2] != mask.shape:
print(f"Dimension mismatch for {files[idx]}")
continue
# Extend mask dimensions to match the multi-channel image
mask_multi_channel = np.repeat(mask[:, :, np.newaxis], original_image.shape[2], axis=2)
# Apply the mask to all channels
masked_image = original_image * (mask_multi_channel > 0)
# Save masked image and other outputs
filename = Path(files[idx]).name
base_name = Path(filename).stem
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
outlines = utils.outlines_list(mask)
io.outlines_to_text(f"{basedir}/{base_name}_outlines.txt", outlines)
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
2023-08-30 10:53:02,398 [INFO] >>bact_phase_omni<< model set to be used 2023-08-30 10:53:02,400 [INFO] >>>> using CPU 2023-08-30 10:53:03,115 [INFO] 0%| | 0/10 [00:00<?, ?it/s] 2023-08-30 10:53:07,632 [INFO] 10%|# | 1/10 [00:04<00:40, 4.52s/it] 2023-08-30 10:53:11,608 [INFO] 20%|## | 2/10 [00:08<00:33, 4.20s/it] 2023-08-30 10:53:15,451 [INFO] 30%|### | 3/10 [00:12<00:28, 4.04s/it] 2023-08-30 10:53:19,333 [INFO] 40%|#### | 4/10 [00:16<00:23, 3.98s/it] 2023-08-30 10:53:23,215 [INFO] 50%|##### | 5/10 [00:20<00:19, 3.94s/it] 2023-08-30 10:53:27,395 [INFO] 60%|###### | 6/10 [00:24<00:16, 4.02s/it] 2023-08-30 10:53:31,446 [INFO] 70%|####### | 7/10 [00:28<00:12, 4.03s/it] 2023-08-30 10:53:35,504 [INFO] 80%|######## | 8/10 [00:32<00:08, 4.04s/it] 2023-08-30 10:53:39,484 [INFO] 90%|######### | 9/10 [00:36<00:04, 4.02s/it] 2023-08-30 10:53:43,542 [INFO] 100%|##########| 10/10 [00:40<00:00, 4.03s/it] 2023-08-30 10:53:43,543 [INFO] 100%|##########| 10/10 [00:40<00:00, 4.04s/it] total segmentation time: 40.487443685531616s
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=0_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=0_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=1_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=1_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=2_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=2_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=3_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=3_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=4_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=4_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=5_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=5_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=6_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=6_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=7_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=7_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=8_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=8_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:56: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=9_masked.tif is a low contrast image
imsave(f"{basedir}/{base_name}_masked.tif", masked_image.astype(np.uint16))
C:\Users\MicrobeJ\AppData\Local\Temp\ipykernel_15928\2410183771.py:60: UserWarning: C:\Users\MicrobeJ\Downloads\omnipose_multichannel\sequence/MAX.tif - T=9_mask.tif is a low contrast image
imsave(f"{basedir}/{base_name}_mask.tif", mask.astype(np.uint16))
In [ ]:
from cellpose_omni import plot
import omnipose
for idx,i in enumerate(n):
maski = masks[idx] # get masks
bdi = flows[idx][-1] # get boundaries
flowi = flows[idx][0] # get RGB flows
# set up the output figure to better match the resolution of the images
f = 10
szX = maski.shape[-1]/mpl.rcParams['figure.dpi']*f
szY = maski.shape[-2]/mpl.rcParams['figure.dpi']*f
fig = plt.figure(figsize=(szY,szX*4))
fig.patch.set_facecolor([0]*4)
plot.show_segmentation(fig, omnipose.utils.normalize99(imgs[i]),
maski, flowi, bdi, channels=chans, omni=True)
plt.tight_layout()
plt.show()